) AS BEGIN SET NOCOUNT ON --Exception Handling Variable Declaration DECLARE @ErrorMessage NVARCHAR(200), @ErrorNumber INT, @ErrorSeverity INT, @ErrorState INT, @ErrorProcedure NVARCHAR(50), @ErrorLine INT, @ErrorDesc NVARCHAR(100)
DECLARE @XMLPayment INT BEGIN TRY IF @XMLParams IS NOT NULL BEGIN --BEGIN IF
SET @ErrorDesc='Error Occured While Inserting into TIX_PAYMENT_SCHEDULE FROM XML'
INSERT INTO TIX_PAYMENT_SCHEDULE ( OwedAmountId, ProposalId, BrandId, DueDate, OverdueDate , CreatedDateTime, LastUpdatedDateTime, ExpectedAmount, ActualAmountReceived, ScheduleBatchJournalId, RuleId, TransactionStatusId, ActionId, IsLate, IsPaymentReceived , IsValidSchedule, --Added by DC : 119 IsCatchupBalanced, CatchupBalanceIdentifier, HasModified --------------------------------------------------- ) SELECT Main.ELEMENT.value('(OwedAmountId)[1]','int') AS OwedAmountId, Main.ELEMENT.value('(ProposalId)[1]','int') AS ProposalId, Main.ELEMENT.value('(BrandId)[1]','int') AS BrandId, convert(datetime,Main.ELEMENT.value('(DueDate)[1]','varchar(100)')) AS DueDate, convert(datetime,Main.ELEMENT.value('(OverdueDate)[1]','varchar(100)')) AS OverdueDate, @ToDate AS CreatedDateTime, @ToDate AS LastUpdatedDateTime, convert(decimal(18,2),Main.ELEMENT.value('(ExpectedAmount)[1]','varchar(100)')) AS ExpectedAmount, convert(decimal(18,2),Main.ELEMENT.value('(ActualAmountReceived)[1]','varchar(100)')) AS ActualAmountReceived, Main.ELEMENT.value('(ScheduleBatchJournalId)[1]','bigint') AS ScheduleBatchJournalId, Main.ELEMENT.value('(RuleId)[1]','int') AS RuleId, Main.ELEMENT.value('(TransactionStatusId)[1]','int') AS TransactionStatusId, Main.ELEMENT.value('(ActionId)[1]','int') AS ActionId, Main.ELEMENT.value('(IsLate)[1]','char(1)') AS IsLate, Main.ELEMENT.value('(IsPaymentReceived)[1]','char(1)') AS IsPaymentReceived, Main.ELEMENT.value('(IsValidSchedule)[1]','char(1)') AS IsValidSchedule
--Added by DC for 119
,Main.ELEMENT.value('(IsCatchupBalanced)[1]','char(1)') AS IsCatchupBalanced ,Main.ELEMENT.value('(CatchupBalanceIdentifier)[1]','nvarchar(1000)') AS CatchupBalanceIdentifier ,@HasModified ---------------------------------------------------------------------
FROM @XMLParams.nodes ('(/ROOT/DATA)') AS Main(ELEMENT)
END--END IF
END TRY--Main END TRY BEGIN CATCH --Main BEGIN CATCH
SELECT @ErrorMessage = @ErrorDesc+Char(13)+Error_Message(), @ErrorSeverity = Error_Severity(), @ErrorState = Error_State(), @ErrorNumber = Error_Number(), @ErrorProcedure = Error_Procedure(), @ErrorLine = Error_Line() RAISERROR( @ErrorMessage, @ErrorSeverity, @ErrorState, @ErrorNumber, @ErrorProcedure, @ErrorLine ) END CATCH --Main END CATCH END --Main END
BEGIN TRY --Exception Handling SET @ErrorDesc='Error Occured while fetching records from TIX_PAYMENT_SCHEDULE'
SELECT PaymentScheduleId, OwedAmountId, ProposalId, DueDate, OverdueDate, ExpectedAmount, TransactionStatusId, IsPaymentReceived, IsLate, ActionId, ActualAmountReceived, IsValidSchedule, BrandId, CaseScheduleId, ReasonId, Comments, NoOfDays, ActionDate, IsCatchupBalanced, CatchupBalanceIdentifier, HasModified from TIX_PAYMENT_SCHEDULE with (nolock) WHERE DUEDATE <=@ToDate AND IsValidSchedule=@IsValidSchedule
SELECT DISTINCT OwedAmountId,proposalId,brandId from TIX_PAYMENT_SCHEDULE with (nolock) WHERE DUEDATE <=@ToDate AND IsValidSchedule=@IsValidSchedule Order By OwedAmountId,ProposalId,BrandId asc SELECT DISTINCT ProposalId from TIX_PAYMENT_SCHEDULE with (nolock) WHERE DUEDATE <=@ToDate AND IsValidSchedule=@IsValidSchedule Order By ProposalId asc
END TRY BEGIN CATCH SELECT @ErrorMessage=@ErrorDesc+CHAR(13)+ Error_Message(), @ErrorNumber=Error_Number(), @ErrorState=Error_State(), @ErrorProcedure=Error_Procedure(), @ErrorLine=Error_Line(), @ErrorSeverity=Error_Severity()
Hi all,I was given a task to create a houseHolding logic under a table thathave millions records.first let me explain what is a house holding:let's say I have 2 records that have the same phone number, that meanthat both records are under the same household, but this can get morecomplicatedthis article explain ithttp://www.teradata.com/t/page/115924/index.htmlif anyone worked with household he knows that you need to scan thetable many time to get all the house holds, I used a dts to do it.I tested the dts on 11 records like the article did and that workgreat, but once I went to million records each loop is taking me 2 houror so....a and I have no idea how how many loops I will have to do.if anyone out there worked with household queries and used sql, yourimput would help me allotthanks.
While doing migration by using cursors for the below given sample data its taking more hours to complete the process. Therefore want to know is there any way I can do it in simple query.
Iam having Amount value alone and Balance has to be calculated value based on CalType. At the same time the Balance has to be reset as 0 when AcNo has changed.
Hi,There are about 30 millions records on my mssql server and I want to access 2 million of them at one time. However, when I try to access with sql command I get time out error. I want to select first 100 record and select the other 100 and so on. May I obtain this?For example;select * from tbl_Customer where name = @name_ ->time out errorSomeone has said that you can solve this problem with < cursors > but I can't find enough article. Thanks...
i have a query to delete millions of records. I whant to delete in batches of a 1000. My Select join statement will return millions of records so this takes alot of time how to i select a 1000 records delete everything that his not in those record and loop and not select the same records again.Here is what i have :
DECLARE @i INT WHILE (1=1) BEGIN BEGIN TRAN DELETE TOP(1000) FROM dbo.ABC123 WHERE SUBSTRING(dumbdumb,1,8) NOT IN
The iussue:Sql 2KI have to keep in the database the data from the last 3 months.Every day I have to load 2 millions records in the database.So every day I have to export (in an other database as historical datacontainer) and delete the 2 millions records inserted 3 month + one day ago.The main problem is that delete operation take a while...involvingtransaction log.The question are:1) How can I improve this operation (export/delete)2) If we decide to migrate to SQL 2005, may we use some feature, as"partitioning" to resolve the problems ? In oracle I can use the "truncatepartition" statement, but in sql 2005, I'm reading, it cant be done.This becouse we can think to create a partition on the last three mounts tosplit data. The partitioning function can be dinamic or containing afunction that says "last 3 months ?" I dont think so.May you help usthank youMastino
I'm trying to compare about 28 million records (270 length) from table A and B using the Lookup task as described in this forum. The process works fine with about two million records or so on my desktop ( p.4 3.39GHz, 1.5 GB Ram), but hangs with the amount of data I'm trying to process. I tried using full and partial caching, but to no avail. I'm thinking this is a hardware resource problem. So, does anyone has any recommendation on the hardware needed for this kind of operation and/or suggestion? Thanks in advance...
Hello all quick question-- Im looking for the most effiecient way to extract data daily from a table with some 9.5 mill records and growing. These are transaction records and ideally I would like to bring over the last days transactions and add them to my existing table. I cannot use the transaction date as sometime we have to operate in an "Offline" mode where the records are brought over sometime later. This could be days are unfortunetaly a week or more. there are some 30 fields in the transaction table so is there a more efficient way to do this simply creating a concatenated key?? Would it be more effiecient to drop and recreate the table daily? that sounds extreme so wanted to get a few ideas.
I want to delete 30-40 million rows from a transactional table. Whats the fastest way to delete these rows. just to delete 300,000 rows it takes 30 min. also i don't want to truncate the table.
Hi, My full text search on 2 millions records is taking time to show the result. I have created full text catalog in RAM drive to make the retrival process faster. But still its taking more than 1 minute to get the matching pattern. I am using SQL server 2005. I have 2 columns (id,text) in my table..
This is my unique index script
USE [SAMPLE] GO CREATE UNIQUE NONCLUSTERED INDEX [ui_productid] ON [dbo].[Products] ( [id] ASC )WITH (SORT_IN_TEMPDB = OFF, DROP_EXISTING = OFF, IGNORE_DUP_KEY = OFF, ONLINE = OFF) ON [PRIMARY]..
This is my primary key index script.. USE [SAMPLE] GO ALTER TABLE [dbo].[Products] ADD CONSTRAINT [PK_Products] PRIMARY KEY CLUSTERED ( [id] ASC )WITH (SORT_IN_TEMPDB = OFF, IGNORE_DUP_KEY = OFF, ONLINE = OFF) ON [PRIMARY]..
This is my query..
SELECT D.[id], D.productname FROM dbo.Products AS D WHERE CONTAINS(productname, 'ford')
What should i do to show the result in 3-4 seconds.
Hi all,Hope there is a quick fix for this:I am inserting data from one table to another on the same DB. Theinsert is pretty simple as in:insert into datatable(field1, field2, field3)select a1, a2, a3 from temptable...This inserts about 4 millions rows in one go. And since I had the'cannot obtain lock resources' problem, several methods were suggestedby some web sites:1) one to split the insert into smaller chunks (I have no idea how Ican spit a insert to insert only n records at a time..)2)to use waitfor - which I did but did not fix the error.3)use bulk insert (in t-sql) - I dont know how to do this?As I see I am simply trying to move data from one table to another(ofcourse lots of data) in SQL Server 2000 and I dont see one simplesolution to the locking problem.any ideas on how best I can do this will save my day!thanks all.
I'm trying to optimize a few batch import procedures we use in our processes.
It currently works like this:
1) Cursor loop cycles through all data to be imported from IMPORT table
2) For every record there is an attempted insert to PROD table in a TRY-CATCH check to see whether the record would pass all the primary key and foreign key constrains in PROD table
3) Only those that pass the TRY-CATCH check gets imported into PROD table
4) Every row gets logged into a separate LOG table, either with a comment like "Import OK" or "Error: foreign key violation in field 'my_id'"
The thing is, the procedure runs fine when I'm importing several thousands of records, but when it comes to hundreds of thousands, the speed becomes an issue, as I currently get 20 records per second and slowing...
There is no other code in that procedure, no queries. Just the Cursor cycle and the try-catch check.
I have one stored procedure for insert, update and delete operations, I need automatically logged the insert,update and delete operations. How can I set auto logged mechanism and how can I access the logs in c#? Thanks
I am trying to transfer 90 million records/250 bytes row length from oracle 8i to sqlserver 2000 using DTS and it is taking 2 seconds to transfer 1000 records. Is there any way I can transfer 90 million records fast at all. This will take more than 10 hours to transfer it.
USE [Testing] GO /****** Object: Table [dbo].[Testing] Script Date: 4/25/2014 11:08:18 AM ******/ SET ANSI_NULLS ON GO SET QUOTED_IDENTIFIER ON
[Code] ....
It seems to work fine with one million records.
Each primary key is unique, but the begindate is non-unique, and i guess even if i use datetime2 and add nanoseconds, from what i have read, there is a chance that i could have a duplicate datetime since the date is imported via XML from multiple sources.
I'm running an ISP database in SQL 6.5 which has a table 'calls'. When thenew month starts I create a new table with the same fields and move the dataof previous month into that table and delete it from calls. So 'calls' holdsthe data of only the current month. for example at the start of november2003 I ran the queriesCreate Table Oct2003Calls {................................}/* Now insert data of october into new table */INSERT Oct2003CallsSELECT *FROM callsWHERE calldate < '11/1/03'/* Finaly delete october data from calls table */DELETE FROM callsWHERE calldate < '11/1/03'The problem is that while the insert query takes about 2 minutes to executethe delete queries takes over 10 minutes to affect the same no. of rows. Whyis that?This causes problems because user authentication stops when this query isrunning which means users cant connect to the internet.
It seems inserting records takes a relatively long time. My guess is it needs time to allocate disk space for the extra space needed. Assuming this is true, are there any DB settings that allow auto space allocation in bigger chunk? I am looking for something like "DB growth factor" or " Table growth factor"
Could some body in microsoft database team explain this behavior? Problem is predominant when cardinality of a column is very high and a where clause is specified on that column. Both use the same index.
I have a row that is being used log track plays on our website.
Here's the table:
CREATE TABLE [dbo].[Music_BandTrackPlays]( [ListenDate] [datetime] NOT NULL DEFAULT (getdate()), [TrackId] [int] NOT NULL, [IPAddress] [varchar](20) ) ON [PRIMARY]
There's a CLUSTERED INDEX on ListenDate ASC and a NON CLUSTERED INDEX on the TrackId.
I have a TRIGGER on the Music_BandTrackPlays table that looks like the following:
CREATE TRIGGER [trig_Increment_Music_BandTrackPlays_PlayCount] ON [dbo].[Music_BandTrackPlays] AFTER INSERT AS UPDATE Music_BandTracks SET Music_BandTracks.PlayCount = Music_BandTracks.PlayCount + TP.PlayCount FROM (SELECT TrackId, COUNT(*) AS PlayCount FROM inserted GROUP BY TrackId) AS TP WHERE Music_BandTracks.TrackId = TP.TrackId
When a simple INSERT statement is done on the Music_BandTrackPlays table, it can take quite a long time. When I remove the TRIGGER the INSERTs are immediate. The Execution plan for the TRIGGER shows that a 'Inserted Scan' is taking up most of the resources.
How exactly is the pseudo 'inserted' table formed?
For now, I think the easiest thing to do is update my logging page so it performs 2 queries. One to UPDATE the Music_BandTracks table and increment the counter, and perform the INSERT into the Music_BandTrackPlays table seperately.
I'm ok with that solution but I would really like to understand why the TRIGGER is taking so long. The 'inserted' pseudo table will be 1 row 99% of the time. Does SQL Server perform a table scan on all 20 million rows in order to determine what's new and put it in the inserted pseudo table?
I have been trying to solve the locking problem from past couple of days. Please help mee!!
Scenario: -------------- I have a SSIS package in which 2 data flow tasks. 1st data flow task deletes records from a 5 tables and the 2nd data flow task should insert records into 1 of the five tables after the success of 1st data flow task. This scenario runs in Transacation.
The above scenrio in the 2nd data flow task hangs in runtime. It does not complete. with sp_who2 command i could see that there is an intent share lock(LK_M_IS) on the table and the status is SUSPENDED.
I dont know how to come out of this locking. Please help.
I have a Stored Procedure that has a query in it and it take 0 second and then a stored procedure that takes 16 seconds. From what I can tell they shoul be the same.
It doesn't recompile when i run the stored procedure, I checked that.
Before implementing memory based bulk copy insert with IRowsetFastLoad interface of SQL Server 2005 OLE DB provider, I want to know some considerations.
- performance : compared with T-SQL's "BULK INSERT ..." and bcp utility
- SQL Server's resource usage : when running memory based bulk copy, server resource's influence
- server side action(behavior) : when server is busy, delayed-update means IRowsetFastLoad::Commit(true) method can insert right after?
- row-count : The rowcount limitation can be inserted by IRowsetFastLoad::InsertRow() method before IRowsetFastLoad::Commit
I am using VS2005 (VB) to develop a PPC WM5.0 Program. And I am using SQLCE 3.0. My PPC Hardware is in 400MHz.
The question is when the program try to insert the first record into sdf database after each time the program started. It takes a long time. Does anyone know why and how can I fix it?
I will load the whole database into a dataset when the program start and do all the "Insert", "Update", "Delete" in this dataset and fill it into database after each action.
cn.Open() sda = New SqlCeDataAdapter(SQL, cn) 'SQL = Select * From Table scb = New SqlCeCommandBuilder(sda) sda.Update(dataset) cn.Close()
I check the sda.update(), it takes about 0.08s for filling one record into database normally. But:
1. Start the PPC Program
2. Load DB into dataset
3. Create a ONE new record in dataset
4. Fill back to DB
When I take this four steps everytime, the filling time is almost 1s or even more!
Actually, 0.08s is just a normal case. Sometimes, it still takes over 1s to filling back a dataset which only inserted one record when the program is running. (Even all inserted records are exactly the same in data jsut different in the integer key)
However, when I give up the dataset and using the following code:
cn.Open() Dim cmd As New SqlCeCommand(SQL, cn) ' I have build the insert SQL before (Insert Into Table values(XXXXXXXXXXXXXXX All field)
I found that it is still the same that the first inserted record takes more time, but just about 0.2s. And the normal insert time is around 0.02s. It is 4 times faster!!!
We need to select rows from the database that have been recently inserted/updated. We have a main primary table (COMMIT_TEST) and a second update table (COMMIT_TEST_UPDATE). The update table contains the primary key and a LAST_UPDATE field which is a datetime (to tell us when an update occurred). Triggers on the primary table are used to populate the update table.
If we insert or update the primary table in a transaction, we would expect that the datetime of the insert/update would be at the commit, however it seems that the insert/update statement is cached and getdate() is executed at the time of the cache instead of the commit. This causes problems as we select rows based on LAST_UPDATE and a commit may occur later but the earlier insert timestamp is saved to the database and we miss that update.
We would like to know if there is anyway to tell the SQL Server to not execute the function getdate() until the commit, or any other way to get the commit to create the correct timestamp.
We are using default isolation level. We have tried using getdate(), current_timestamp and even {fn Now()} with the same results. SQL Queries that reproduce the problem are provided below:
/* Different functions to get current timestamp all have been tested to produce the same results */ /* SELECT GETDATE() GO SELECT CURRENT_TIMESTAMP GO SELECT {fn Now()} GO */ /* Use these statements to delete the tables to allow recreate of the tables */ /* DROP TABLE COMMIT_TEST DROP TABLE COMMIT_TEST_UPDATE */ /* Create a primary table and an UPDATE table to store the date/time when the primary table is modified */ CREATE TABLE dbo.COMMIT_TEST (PKEY int PRIMARY KEY, timestamp) /* ROW_VERSION rowversion */ GO CREATE TABLE dbo.COMMIT_TEST_UPDATE (PKEY int PRIMARY KEY, LAST_UPDATE datetime, timestamp ) /* ROW_VERSION rowversion */ GO /* Use these statements to delete the triggers to allow reinsert */ /* drop trigger LOG_COMMIT_TEST_INSERT drop trigger LOG_COMMIT_TEST_UPDATE drop trigger LOG_COMMIT_TEST_DELETE */ /* Create insert, update and delete triggers */ create trigger LOG_COMMIT_TEST_INSERT on COMMIT_TEST for INSERT as begin declare @time datetime select @time = getdate()
insert into COMMIT_TEST_UPDATE (PKEY,LAST_UPDATE) select PKEY, getdate() from inserted end GO create trigger LOG_COMMIT_TEST_UPDATE on COMMIT_TEST for UPDATE as begin declare @time datetime select @time = getdate()
update COMMIT_TEST_UPDATE set LAST_UPDATE = getdate() from COMMIT_TEST_UPDATE, deleted, inserted where COMMIT_TEST_UPDATE.PKEY = deleted.PKEY end GO /* In our application deletes should never occur so we dont log when they get modified we just delete them from the UPDATE table */ create trigger LOG_COMMIT_TEST_DELETE on COMMIT_TEST for DELETE as begin if ( select count(*) from deleted ) > 0 begin delete COMMIT_TEST_UPDATE from COMMIT_TEST_UPDATE, deleted where COMMIT_TEST_UPDATE.PKEY = deleted.PKEY end end GO /* Delete any previous inserted record to avoid errors when inserting */ DELETE COMMIT_TEST WHERE PKEY = 1 GO /* What is the current date/time */ SELECT GETDATE() GO BEGIN TRANSACTION GO /* Insert a record into the primary table */ INSERT COMMIT_TEST (PKEY) VALUES (1) GO /* Simulate additional processing within this transaction */ WAITFOR DELAY '00:00:10' GO /* We expect at this point that the date is written to the database (or at least we need some way for this to happen) */ COMMIT TRANSACTION GO /* get the current date to show us what date/time should have been committed to the database */ SELECT GETDATE() GO /* Select results from the table we see that the timestamp is 10 seconds older than the commit, in other words it was evaluated at */ /* the insert statement, even though the row could not be read with a SELECT as it was uncommitted */ SELECT * FROM COMMIT_TEST GO SELECT * FROM COMMIT_TEST_UPDATE
Any help would be appreciated, we understand we could make changes to the application/database to approximate what we need, but all the solutions have identified suffer from possible performance issues, or could still lead to missing deals (assuming the commit time is larger than some artifical time window).
I tried to port 10000 records using DTS. After porting of 9900 records I got an error and comes out without any result. But I want to keep the records which has been ported till the error occured. Plz help me.
On my site users can register using ASP Membership Create user Wizard control. I am also using the wizard control to design a simple question and answer form that logged in users have access to. it has 2 questions including a text box for Q1 and dropdown list for Q2. I have a table in my database called "Players" which has 3 Columns UserId Primary Key of type Unique Identifyer PlayerName Type String PlayerGenre Type Sting
On completing the wizard and clicking the finish button, I want the data to be inserted into the SQl express Players table. I am having problems getting this to work and keep getting exceptions. Be very helpful if somebody could check the code and advise where the problem is??
To match the answers to the user I get the UserId and insert this into the database to.protected void Wizard1_FinishButtonClick(object sender, WizardNavigationEventArgs e) { SqlDataSource DataSource = (SqlDataSource)Wizard1.FindControl("InsertArtist1"); MembershipUser myUser = Membership.GetUser(this.User.Identity.Name); Guid UserId = (Guid)myUser.ProviderUserKey;String Gender = ((DropDownList)Wizard1.FindControl("PlayerGenre")).SelectedValue; DataSource.InsertParameters.Add("UserId", UserId.ToString());DataSource.InsertParameters.Add("PlayerGenre", Gender.ToString()); DataSource.Insert();
i have an application that generate a lot rows from 1 mellion to 2 mellions rows i wana insert this record in MS SQL server in a fast way
i am currentlly loop through this records while it is loaded in dataset building a command text that generate insert query for each row and run it against SQL server
but it takes a lot of time to be finished is there r a way to bulk insert this data?